-
Notifications
You must be signed in to change notification settings - Fork 13.3k
[llvm][test] Fix filecheck annotation typos [2/n] #95433
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Conversation
@llvm/pr-subscribers-llvm-analysis @llvm/pr-subscribers-backend-x86 Author: klensy (klensy) Changesblocked on #93673, actual changes in last commit only, other ones from previous. Patch is 261.42 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/95433.diff 184 Files Affected:
diff --git a/llvm/test/Analysis/CostModel/AArch64/sve-shuffle-broadcast.ll b/llvm/test/Analysis/CostModel/AArch64/sve-shuffle-broadcast.ll
index a2526d9f5591a..c2aab35194831 100644
--- a/llvm/test/Analysis/CostModel/AArch64/sve-shuffle-broadcast.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/sve-shuffle-broadcast.ll
@@ -31,7 +31,7 @@ define void @broadcast() #0{
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = shufflevector <vscale x 8 x i1> undef, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = shufflevector <vscale x 4 x i1> undef, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %24 = shufflevector <vscale x 2 x i1> undef, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
-; CHECK-NETX: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
%zero = shufflevector <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%1 = shufflevector <vscale x 32 x i8> undef, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/pointer-phis.ll b/llvm/test/Analysis/LoopAccessAnalysis/pointer-phis.ll
index a214451bfd3fd..f0a5e9045c3b2 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/pointer-phis.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/pointer-phis.ll
@@ -293,7 +293,7 @@ define i32 @store_with_pointer_phi_incoming_phi(ptr %A, ptr %B, ptr %C, i1 %c.0,
; CHECK-EMPTY:
; CHECK-NEXT: Expressions re-written:
;
-; CHECK-EMPTY
+; CHECK-EMPTY:
entry:
br label %loop.header
@@ -376,7 +376,7 @@ define i32 @store_with_pointer_phi_incoming_phi_irreducible_cycle(ptr %A, ptr %B
; CHECK-EMPTY:
; CHECK-NEXT: Expressions re-written:
;
-; CHECK-EMPTY
+; CHECK-EMPTY:
entry:
br label %loop.header
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/branch-outside.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/branch-outside.ll
index 7fd8ac40e4bec..500fcc41dc40c 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/branch-outside.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/branch-outside.ll
@@ -1,6 +1,6 @@
; RUN: opt %s -mtriple amdgcn-- -passes='print<uniformity>' -disable-output 2>&1 | FileCheck %s
-; CHECK=LABEL: UniformityInfo for function 'basic':
+; CHECK-LABEL: UniformityInfo for function 'basic':
; CHECK: CYCLES ASSSUMED DIVERGENT:
; CHECK: depth=1: entries(P T) Q
define amdgpu_kernel void @basic(i32 %a, i32 %b, i32 %c) {
@@ -37,7 +37,7 @@ exit:
ret void
}
-; CHECK=LABEL: UniformityInfo for function 'nested':
+; CHECK-LABEL: UniformityInfo for function 'nested':
; CHECK: CYCLES ASSSUMED DIVERGENT:
; CHECK: depth=1: entries(P T) Q A C B
define amdgpu_kernel void @nested(i32 %a, i32 %b, i32 %c) {
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/exit-divergence.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/exit-divergence.ll
index 2a3ff4166213d..4b6fced1d58dd 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/exit-divergence.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/exit-divergence.ll
@@ -1,6 +1,6 @@
; RUN: opt %s -mtriple amdgcn-- -passes='print<uniformity>' -disable-output 2>&1 | FileCheck %s
-; CHECK=LABEL: UniformityInfo for function 'basic':
+; CHECK-LABEL: UniformityInfo for function 'basic':
; CHECK-NOT: CYCLES ASSSUMED DIVERGENT:
; CHECK: CYCLES WITH DIVERGENT EXIT:
; CHECK: depth=1: entries(P T) Q
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/reducible-headers.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/reducible-headers.ll
index feb29497f80c9..6edd6384db7d1 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/reducible-headers.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/reducible-headers.ll
@@ -31,7 +31,7 @@
; at P should not be marked divergent.
define amdgpu_kernel void @nested_irreducible(i32 %a, i32 %b, i32 %c) {
-; CHECK=LABEL: UniformityInfo for function 'nested_irreducible':
+; CHECK-LABEL: UniformityInfo for function 'nested_irreducible':
; CHECK-NOT: CYCLES ASSSUMED DIVERGENT:
; CHECK: CYCLES WITH DIVERGENT EXIT:
; CHECK-DAG: depth=2: entries(P T) R Q
@@ -118,7 +118,7 @@ exit:
; Thus, any PHI at P should not be marked divergent.
define amdgpu_kernel void @header_label_1(i32 %a, i32 %b, i32 %c) {
-; CHECK=LABEL: UniformityInfo for function 'header_label_1':
+; CHECK-LABEL: UniformityInfo for function 'header_label_1':
; CHECK-NOT: CYCLES ASSSUMED DIVERGENT:
; CHECK: CYCLES WITH DIVERGENT EXIT:
; CHECK: depth=1: entries(H) Q P U T R
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/temporal_diverge.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/temporal_diverge.ll
index 395d7125e3c8d..3015e1326a406 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/temporal_diverge.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/temporal_diverge.ll
@@ -169,7 +169,7 @@ X:
br label %G
G:
-; C HECK: DIVERGENT: %div.user =
+; CHECK: DIVERGENT: %div.user =
%div.user = add i32 %uni.inc, 5
br i1 %uni.cond, label %G, label %Y
; CHECK: DIVERGENT: %div.user =
diff --git a/llvm/test/Assembler/bfloat.ll b/llvm/test/Assembler/bfloat.ll
index 3a3b4c2b277db..6f935c5dac154 100644
--- a/llvm/test/Assembler/bfloat.ll
+++ b/llvm/test/Assembler/bfloat.ll
@@ -37,25 +37,25 @@ define float @check_bfloat_convert() {
ret float %tmp
}
-; ASSEM-DISASS-LABEL @snan_bfloat
+; ASSEM-DISASS-LABEL: @snan_bfloat
define bfloat @snan_bfloat() {
; ASSEM-DISASS: ret bfloat 0xR7F81
ret bfloat 0xR7F81
}
-; ASSEM-DISASS-LABEL @qnan_bfloat
+; ASSEM-DISASS-LABEL: @qnan_bfloat
define bfloat @qnan_bfloat() {
; ASSEM-DISASS: ret bfloat 0xR7FC0
ret bfloat 0xR7FC0
}
-; ASSEM-DISASS-LABEL @pos_inf_bfloat
+; ASSEM-DISASS-LABEL: @pos_inf_bfloat
define bfloat @pos_inf_bfloat() {
; ASSEM-DISASS: ret bfloat 0xR7F80
ret bfloat 0xR7F80
}
-; ASSEM-DISASS-LABEL @neg_inf_bfloat
+; ASSEM-DISASS-LABEL: @neg_inf_bfloat
define bfloat @neg_inf_bfloat() {
; ASSEM-DISASS: ret bfloat 0xRFF80
ret bfloat 0xRFF80
diff --git a/llvm/test/Bitcode/convergence-control.ll b/llvm/test/Bitcode/convergence-control.ll
index 7ba5609b6a7cc..6988ab029f42a 100644
--- a/llvm/test/Bitcode/convergence-control.ll
+++ b/llvm/test/Bitcode/convergence-control.ll
@@ -18,7 +18,7 @@ B:
C:
; CHECK-LABEL: C:
; CHECK: [[C:%.*]] = call token @llvm.experimental.convergence.loop() [ "convergencectrl"(token [[B]]) ]
- ; CHEC K: call void @f() [ "convergencectrl"(token [[C]]) ]
+ ; CHECK: call void @f() [ "convergencectrl"(token [[C]]) ]
;
%c = call token @llvm.experimental.convergence.loop() [ "convergencectrl"(token %b) ]
call void @f() [ "convergencectrl"(token %c) ]
diff --git a/llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll b/llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll
index b2643dc8f9dcb..44071a113a4a0 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll
@@ -320,8 +320,8 @@ declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3lane.
define %struct.bfloat16x8x3_t @test_vld3q_lane_bf16(ptr %ptr, [3 x <8 x bfloat>] %src.coerce) local_unnamed_addr nounwind {
; CHECK-LABEL: test_vld3q_lane_bf16:
; CHECK: // %bb.0: // %entry
-; CHECKT: ld3 { v0.h, v1.h, v2.h }[7], [x0]
-; CHECKT: ret
+; CHECK: ld3 { v0.h, v1.h, v2.h }[7], [x0]
+; CHECK: ret
entry:
%src.coerce.fca.0.extract = extractvalue [3 x <8 x bfloat>] %src.coerce, 0
%src.coerce.fca.1.extract = extractvalue [3 x <8 x bfloat>] %src.coerce, 1
diff --git a/llvm/test/CodeGen/AArch64/aarch64-mulv.ll b/llvm/test/CodeGen/AArch64/aarch64-mulv.ll
index e11ae9a251590..aa4f374d5d7e7 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-mulv.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-mulv.ll
@@ -2,7 +2,7 @@
; RUN: llc -mtriple=aarch64 -aarch64-enable-sink-fold=true -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
; RUN: llc -mtriple=aarch64 -aarch64-enable-sink-fold=true -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
-; CHECK_GI: warning: Instruction selection used fallback path for mulv_v3i64
+; CHECK-GI: warning: Instruction selection used fallback path for mulv_v3i64
declare i8 @llvm.vector.reduce.mul.v2i8(<2 x i8>)
declare i8 @llvm.vector.reduce.mul.v3i8(<3 x i8>)
diff --git a/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll b/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
index 6bcd2f04849b2..d999959bba46f 100644
--- a/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
@@ -8,7 +8,7 @@ define i32 @fct(i32 %i1, i32 %i2) {
; Sign extension is used more than once, thus it should not be folded.
; CodeGenPrepare is not sharing sext across uses, thus this is folded because
; of that.
-; _CHECK-NOT: , sxtw]
+; CHECK-NOT: , sxtw]
entry:
%idxprom = sext i32 %i1 to i64
%0 = load ptr, ptr @block, align 8
diff --git a/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll b/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll
index 0000262e833da..19b9205dc1786 100644
--- a/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll
+++ b/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll
@@ -2,70 +2,70 @@
; RUN: llc -mtriple=arm64_32-apple-ios7.0 -mattr=+outline-atomics -o - %s | FileCheck %s -check-prefix=OUTLINE-ATOMICS
define i8 @test_load_8(ptr %addr) {
-; CHECK-LABAL: test_load_8:
+; CHECK-LABEL: test_load_8:
; CHECK: ldarb w0, [x0]
%val = load atomic i8, ptr %addr seq_cst, align 1
ret i8 %val
}
define i16 @test_load_16(ptr %addr) {
-; CHECK-LABAL: test_load_16:
+; CHECK-LABEL: test_load_16:
; CHECK: ldarh w0, [x0]
%val = load atomic i16, ptr %addr acquire, align 2
ret i16 %val
}
define i32 @test_load_32(ptr %addr) {
-; CHECK-LABAL: test_load_32:
+; CHECK-LABEL: test_load_32:
; CHECK: ldar w0, [x0]
%val = load atomic i32, ptr %addr seq_cst, align 4
ret i32 %val
}
define i64 @test_load_64(ptr %addr) {
-; CHECK-LABAL: test_load_64:
+; CHECK-LABEL: test_load_64:
; CHECK: ldar x0, [x0]
%val = load atomic i64, ptr %addr seq_cst, align 8
ret i64 %val
}
define ptr @test_load_ptr(ptr %addr) {
-; CHECK-LABAL: test_load_ptr:
+; CHECK-LABEL: test_load_ptr:
; CHECK: ldar w0, [x0]
%val = load atomic ptr, ptr %addr seq_cst, align 8
ret ptr %val
}
define void @test_store_8(ptr %addr) {
-; CHECK-LABAL: test_store_8:
+; CHECK-LABEL: test_store_8:
; CHECK: stlrb wzr, [x0]
store atomic i8 0, ptr %addr seq_cst, align 1
ret void
}
define void @test_store_16(ptr %addr) {
-; CHECK-LABAL: test_store_16:
+; CHECK-LABEL: test_store_16:
; CHECK: stlrh wzr, [x0]
store atomic i16 0, ptr %addr seq_cst, align 2
ret void
}
define void @test_store_32(ptr %addr) {
-; CHECK-LABAL: test_store_32:
+; CHECK-LABEL: test_store_32:
; CHECK: stlr wzr, [x0]
store atomic i32 0, ptr %addr seq_cst, align 4
ret void
}
define void @test_store_64(ptr %addr) {
-; CHECK-LABAL: test_store_64:
+; CHECK-LABEL: test_store_64:
; CHECK: stlr xzr, [x0]
store atomic i64 0, ptr %addr seq_cst, align 8
ret void
}
define void @test_store_ptr(ptr %addr) {
-; CHECK-LABAL: test_store_ptr:
+; CHECK-LABEL: test_store_ptr:
; CHECK: stlr wzr, [x0]
store atomic ptr null, ptr %addr seq_cst, align 8
ret void
diff --git a/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll b/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll
index e9556b9d5cbee..c550a24754c96 100644
--- a/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll
+++ b/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll
@@ -1,7 +1,7 @@
; RUN: llc -mtriple=arm64ec-pc-windows-msvc < %s | FileCheck %s
define void @no_op() nounwind {
-; CHECK-LABEL .def $ientry_thunk$cdecl$v$v;
+; CHECK-LABEL: .def $ientry_thunk$cdecl$v$v;
; CHECK: .section .wowthk$aa,"xr",discard,$ientry_thunk$cdecl$v$v
; CHECK: // %bb.0:
; CHECK-NEXT: stp q6, q7, [sp, #-176]! // 32-byte Folded Spill
diff --git a/llvm/test/CodeGen/AArch64/cxx-tlscc.ll b/llvm/test/CodeGen/AArch64/cxx-tlscc.ll
index 21367aaa8b07f..5a2be8e0e47a9 100644
--- a/llvm/test/CodeGen/AArch64/cxx-tlscc.ll
+++ b/llvm/test/CodeGen/AArch64/cxx-tlscc.ll
@@ -46,7 +46,7 @@ __tls_init.exit:
; CHECK-NOT: stp x20, x19
; FIXME: The splitting logic in the register allocator fails to split along
; control flow here, we used to get this right by accident before...
-; CHECK-NOTXX: stp x14, x13
+; COM: CHECK-NOT: stp x14, x13
; CHECK-NOT: stp x12, x11
; CHECK-NOT: stp x10, x9
; CHECK-NOT: stp x8, x7
@@ -65,7 +65,7 @@ __tls_init.exit:
; CHECK-NOT: ldp x8, x7
; CHECK-NOT: ldp x10, x9
; CHECK-NOT: ldp x12, x11
-; CHECK-NOTXX: ldp x14, x13
+; COM: CHECK-NOT: ldp x14, x13
; CHECK-NOT: ldp x20, x19
; CHECK-NOT: ldp d1, d0
; CHECK-NOT: ldp d3, d2
diff --git a/llvm/test/CodeGen/AArch64/fp16-fmla.ll b/llvm/test/CodeGen/AArch64/fp16-fmla.ll
index a81721afb8453..916fbeb94dcf8 100644
--- a/llvm/test/CodeGen/AArch64/fp16-fmla.ll
+++ b/llvm/test/CodeGen/AArch64/fp16-fmla.ll
@@ -84,11 +84,11 @@ entry:
define <4 x half> @test_FMLAv4i16_indexed_OP1(<4 x half> %a, <4 x i16> %b, <4 x i16> %c) {
; CHECK-LABEL: test_FMLAv4i16_indexed_OP1:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
; CHECK: mul
; CHECK: fadd
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%mul = mul <4 x i16> %c, %b
%m = bitcast <4 x i16> %mul to <4 x half>
@@ -98,11 +98,11 @@ entry:
define <4 x half> @test_FMLAv4i16_indexed_OP2(<4 x half> %a, <4 x i16> %b, <4 x i16> %c) {
; CHECK-LABEL: test_FMLAv4i16_indexed_OP2:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
; CHECK: mul
; CHECK: fadd
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%mul = mul <4 x i16> %c, %b
%m = bitcast <4 x i16> %mul to <4 x half>
@@ -112,11 +112,11 @@ entry:
define <8 x half> @test_FMLAv8i16_indexed_OP1(<8 x half> %a, <8 x i16> %b, <8 x i16> %c) {
; CHECK-LABEL: test_FMLAv8i16_indexed_OP1:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
; CHECK: mul
; CHECK: fadd
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%mul = mul <8 x i16> %c, %b
%m = bitcast <8 x i16> %mul to <8 x half>
@@ -126,11 +126,11 @@ entry:
define <8 x half> @test_FMLAv8i16_indexed_OP2(<8 x half> %a, <8 x i16> %b, <8 x i16> %c) {
; CHECK-LABEL: test_FMLAv8i16_indexed_OP2:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
; CHECK: mul
; CHECK: fadd
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%mul = mul <8 x i16> %c, %b
%m = bitcast <8 x i16> %mul to <8 x half>
@@ -178,11 +178,11 @@ entry:
define <4 x half> @test_FMLSv4i16_indexed_OP2(<4 x half> %a, <4 x i16> %b, <4 x i16> %c) {
; CHECK-LABEL: test_FMLSv4i16_indexed_OP2:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
; CHECK: mul
; CHECK: fsub
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmls {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmls {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%mul = mul <4 x i16> %c, %b
%m = bitcast <4 x i16> %mul to <4 x half>
@@ -192,12 +192,12 @@ entry:
define <8 x half> @test_FMLSv8i16_indexed_OP1(<8 x half> %a, <8 x i16> %b, <8 x i16> %c) {
; CHECK-LABEL: test_FMLSv8i16_indexed_OP1:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
; CHECK: mul
; CHECK: fsub
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fneg {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-; CHECK-FIXME: fmla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fneg {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; COM: CHECK: fmla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%mul = mul <8 x i16> %c, %b
%m = bitcast <8 x i16> %mul to <8 x half>
@@ -207,11 +207,11 @@ entry:
define <8 x half> @test_FMLSv8i16_indexed_OP2(<8 x half> %a, <8 x i16> %b, <8 x i16> %c) {
; CHECK-LABEL: test_FMLSv8i16_indexed_OP2:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
; CHECK: mul
; CHECK: fsub
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmls {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmls {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%mul = mul <8 x i16> %c, %b
%m = bitcast <8 x i16> %mul to <8 x half>
diff --git a/llvm/test/CodeGen/AArch64/fpimm.ll b/llvm/test/CodeGen/AArch64/fpimm.ll
index b92bb4245c7f3..e2944243338f5 100644
--- a/llvm/test/CodeGen/AArch64/fpimm.ll
+++ b/llvm/test/CodeGen/AArch64/fpimm.ll
@@ -38,7 +38,7 @@ define void @check_double() {
; 64-bit ORR followed by MOVK.
; CHECK-DAG: mov [[XFP0:x[0-9]+]], #1082331758844
; CHECK-DAG: movk [[XFP0]], #64764, lsl #16
-; CHECk-DAG: fmov {{d[0-9]+}}, [[XFP0]]
+; CHECK-DAG: fmov {{d[0-9]+}}, [[XFP0]]
%newval3 = fadd double %val, 0xFCFCFC00FC
store volatile double %newval3, ptr @varf64
diff --git a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-diff-scope-same-key.ll b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-diff-scope-same-key.ll
index a5757a70843a9..fa63df35ac857 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-diff-scope-same-key.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-diff-scope-same-key.ll
@@ -28,7 +28,7 @@ define void @a() "sign-return-address"="all" {
}
define void @b() "sign-return-address"="non-leaf" {
-; CHECK-LABE: b: // @b
+; CHECK-LABEL: b: // @b
; V8A-NOT: hint #25
; V83A-NOT: paciasp
; CHECK-NOT: .cfi_negate_ra_state
diff --git a/llvm/test/CodeGen/AArch64/misched-fusion-lit.ll b/llvm/test/CodeGen/AArch64/misched-fusion-lit.ll
index fedbb642a3620..c589d356e6937 100644
--- a/llvm/test/CodeGen/AArch64/misched-fusion-lit.ll
+++ b/llvm/test/CodeGen/AArch64/misched-fusion-lit.ll
@@ -83,11 +83,11 @@ entry:
ret double 0x400921FB54442D18
; CHECK-LABEL: litf:
-; CHECK-DONT: adrp [[ADDR:x[0-9]+]], [[CSTLABEL:.LCP.*]]
-; CHECK-DONT-NEXT: ldr {{d[0-9]+}}, {{[[]}}[[ADDR]], :lo12:[[CSTLABEL]]{{[]]}}
-; CHECK-FUSE: mov [[R:x[0-9]+]], #11544
-; CHECK-FUSE: movk [[R]], #21572, lsl #16
-; CHECK-FUSE: movk [[R]], #8699, lsl #32
-; CHECK-FUSE: movk [[R]], #16393, lsl #48
-; CHECK-FUSE: fmov {{d[0-9]+}}, [[R]]
+; CHECKDONT: adrp [[ADDR:x[0-9]+]], [[CSTLABEL:.LCP.*]]
+; CHECKDONT-NEXT: ldr {{d[0-9]+}}, {{[[]}}[[ADDR]], :lo12:[[CSTLABEL]]{{[]]}}
+; CHECKFUSE: mov [[R:...
[truncated]
|
@llvm/pr-subscribers-backend-amdgpu Author: klensy (klensy) Changesblocked on #93673, actual changes in last commit only, other ones from previous. Patch is 261.42 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/95433.diff 184 Files Affected:
diff --git a/llvm/test/Analysis/CostModel/AArch64/sve-shuffle-broadcast.ll b/llvm/test/Analysis/CostModel/AArch64/sve-shuffle-broadcast.ll
index a2526d9f5591a..c2aab35194831 100644
--- a/llvm/test/Analysis/CostModel/AArch64/sve-shuffle-broadcast.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/sve-shuffle-broadcast.ll
@@ -31,7 +31,7 @@ define void @broadcast() #0{
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = shufflevector <vscale x 8 x i1> undef, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = shufflevector <vscale x 4 x i1> undef, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %24 = shufflevector <vscale x 2 x i1> undef, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
-; CHECK-NETX: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
%zero = shufflevector <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%1 = shufflevector <vscale x 32 x i8> undef, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/pointer-phis.ll b/llvm/test/Analysis/LoopAccessAnalysis/pointer-phis.ll
index a214451bfd3fd..f0a5e9045c3b2 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/pointer-phis.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/pointer-phis.ll
@@ -293,7 +293,7 @@ define i32 @store_with_pointer_phi_incoming_phi(ptr %A, ptr %B, ptr %C, i1 %c.0,
; CHECK-EMPTY:
; CHECK-NEXT: Expressions re-written:
;
-; CHECK-EMPTY
+; CHECK-EMPTY:
entry:
br label %loop.header
@@ -376,7 +376,7 @@ define i32 @store_with_pointer_phi_incoming_phi_irreducible_cycle(ptr %A, ptr %B
; CHECK-EMPTY:
; CHECK-NEXT: Expressions re-written:
;
-; CHECK-EMPTY
+; CHECK-EMPTY:
entry:
br label %loop.header
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/branch-outside.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/branch-outside.ll
index 7fd8ac40e4bec..500fcc41dc40c 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/branch-outside.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/branch-outside.ll
@@ -1,6 +1,6 @@
; RUN: opt %s -mtriple amdgcn-- -passes='print<uniformity>' -disable-output 2>&1 | FileCheck %s
-; CHECK=LABEL: UniformityInfo for function 'basic':
+; CHECK-LABEL: UniformityInfo for function 'basic':
; CHECK: CYCLES ASSSUMED DIVERGENT:
; CHECK: depth=1: entries(P T) Q
define amdgpu_kernel void @basic(i32 %a, i32 %b, i32 %c) {
@@ -37,7 +37,7 @@ exit:
ret void
}
-; CHECK=LABEL: UniformityInfo for function 'nested':
+; CHECK-LABEL: UniformityInfo for function 'nested':
; CHECK: CYCLES ASSSUMED DIVERGENT:
; CHECK: depth=1: entries(P T) Q A C B
define amdgpu_kernel void @nested(i32 %a, i32 %b, i32 %c) {
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/exit-divergence.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/exit-divergence.ll
index 2a3ff4166213d..4b6fced1d58dd 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/exit-divergence.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/exit-divergence.ll
@@ -1,6 +1,6 @@
; RUN: opt %s -mtriple amdgcn-- -passes='print<uniformity>' -disable-output 2>&1 | FileCheck %s
-; CHECK=LABEL: UniformityInfo for function 'basic':
+; CHECK-LABEL: UniformityInfo for function 'basic':
; CHECK-NOT: CYCLES ASSSUMED DIVERGENT:
; CHECK: CYCLES WITH DIVERGENT EXIT:
; CHECK: depth=1: entries(P T) Q
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/reducible-headers.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/reducible-headers.ll
index feb29497f80c9..6edd6384db7d1 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/reducible-headers.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/reducible-headers.ll
@@ -31,7 +31,7 @@
; at P should not be marked divergent.
define amdgpu_kernel void @nested_irreducible(i32 %a, i32 %b, i32 %c) {
-; CHECK=LABEL: UniformityInfo for function 'nested_irreducible':
+; CHECK-LABEL: UniformityInfo for function 'nested_irreducible':
; CHECK-NOT: CYCLES ASSSUMED DIVERGENT:
; CHECK: CYCLES WITH DIVERGENT EXIT:
; CHECK-DAG: depth=2: entries(P T) R Q
@@ -118,7 +118,7 @@ exit:
; Thus, any PHI at P should not be marked divergent.
define amdgpu_kernel void @header_label_1(i32 %a, i32 %b, i32 %c) {
-; CHECK=LABEL: UniformityInfo for function 'header_label_1':
+; CHECK-LABEL: UniformityInfo for function 'header_label_1':
; CHECK-NOT: CYCLES ASSSUMED DIVERGENT:
; CHECK: CYCLES WITH DIVERGENT EXIT:
; CHECK: depth=1: entries(H) Q P U T R
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/temporal_diverge.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/temporal_diverge.ll
index 395d7125e3c8d..3015e1326a406 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/temporal_diverge.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/temporal_diverge.ll
@@ -169,7 +169,7 @@ X:
br label %G
G:
-; C HECK: DIVERGENT: %div.user =
+; CHECK: DIVERGENT: %div.user =
%div.user = add i32 %uni.inc, 5
br i1 %uni.cond, label %G, label %Y
; CHECK: DIVERGENT: %div.user =
diff --git a/llvm/test/Assembler/bfloat.ll b/llvm/test/Assembler/bfloat.ll
index 3a3b4c2b277db..6f935c5dac154 100644
--- a/llvm/test/Assembler/bfloat.ll
+++ b/llvm/test/Assembler/bfloat.ll
@@ -37,25 +37,25 @@ define float @check_bfloat_convert() {
ret float %tmp
}
-; ASSEM-DISASS-LABEL @snan_bfloat
+; ASSEM-DISASS-LABEL: @snan_bfloat
define bfloat @snan_bfloat() {
; ASSEM-DISASS: ret bfloat 0xR7F81
ret bfloat 0xR7F81
}
-; ASSEM-DISASS-LABEL @qnan_bfloat
+; ASSEM-DISASS-LABEL: @qnan_bfloat
define bfloat @qnan_bfloat() {
; ASSEM-DISASS: ret bfloat 0xR7FC0
ret bfloat 0xR7FC0
}
-; ASSEM-DISASS-LABEL @pos_inf_bfloat
+; ASSEM-DISASS-LABEL: @pos_inf_bfloat
define bfloat @pos_inf_bfloat() {
; ASSEM-DISASS: ret bfloat 0xR7F80
ret bfloat 0xR7F80
}
-; ASSEM-DISASS-LABEL @neg_inf_bfloat
+; ASSEM-DISASS-LABEL: @neg_inf_bfloat
define bfloat @neg_inf_bfloat() {
; ASSEM-DISASS: ret bfloat 0xRFF80
ret bfloat 0xRFF80
diff --git a/llvm/test/Bitcode/convergence-control.ll b/llvm/test/Bitcode/convergence-control.ll
index 7ba5609b6a7cc..6988ab029f42a 100644
--- a/llvm/test/Bitcode/convergence-control.ll
+++ b/llvm/test/Bitcode/convergence-control.ll
@@ -18,7 +18,7 @@ B:
C:
; CHECK-LABEL: C:
; CHECK: [[C:%.*]] = call token @llvm.experimental.convergence.loop() [ "convergencectrl"(token [[B]]) ]
- ; CHEC K: call void @f() [ "convergencectrl"(token [[C]]) ]
+ ; CHECK: call void @f() [ "convergencectrl"(token [[C]]) ]
;
%c = call token @llvm.experimental.convergence.loop() [ "convergencectrl"(token %b) ]
call void @f() [ "convergencectrl"(token %c) ]
diff --git a/llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll b/llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll
index b2643dc8f9dcb..44071a113a4a0 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll
@@ -320,8 +320,8 @@ declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3lane.
define %struct.bfloat16x8x3_t @test_vld3q_lane_bf16(ptr %ptr, [3 x <8 x bfloat>] %src.coerce) local_unnamed_addr nounwind {
; CHECK-LABEL: test_vld3q_lane_bf16:
; CHECK: // %bb.0: // %entry
-; CHECKT: ld3 { v0.h, v1.h, v2.h }[7], [x0]
-; CHECKT: ret
+; CHECK: ld3 { v0.h, v1.h, v2.h }[7], [x0]
+; CHECK: ret
entry:
%src.coerce.fca.0.extract = extractvalue [3 x <8 x bfloat>] %src.coerce, 0
%src.coerce.fca.1.extract = extractvalue [3 x <8 x bfloat>] %src.coerce, 1
diff --git a/llvm/test/CodeGen/AArch64/aarch64-mulv.ll b/llvm/test/CodeGen/AArch64/aarch64-mulv.ll
index e11ae9a251590..aa4f374d5d7e7 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-mulv.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-mulv.ll
@@ -2,7 +2,7 @@
; RUN: llc -mtriple=aarch64 -aarch64-enable-sink-fold=true -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
; RUN: llc -mtriple=aarch64 -aarch64-enable-sink-fold=true -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
-; CHECK_GI: warning: Instruction selection used fallback path for mulv_v3i64
+; CHECK-GI: warning: Instruction selection used fallback path for mulv_v3i64
declare i8 @llvm.vector.reduce.mul.v2i8(<2 x i8>)
declare i8 @llvm.vector.reduce.mul.v3i8(<3 x i8>)
diff --git a/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll b/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
index 6bcd2f04849b2..d999959bba46f 100644
--- a/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
@@ -8,7 +8,7 @@ define i32 @fct(i32 %i1, i32 %i2) {
; Sign extension is used more than once, thus it should not be folded.
; CodeGenPrepare is not sharing sext across uses, thus this is folded because
; of that.
-; _CHECK-NOT: , sxtw]
+; CHECK-NOT: , sxtw]
entry:
%idxprom = sext i32 %i1 to i64
%0 = load ptr, ptr @block, align 8
diff --git a/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll b/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll
index 0000262e833da..19b9205dc1786 100644
--- a/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll
+++ b/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll
@@ -2,70 +2,70 @@
; RUN: llc -mtriple=arm64_32-apple-ios7.0 -mattr=+outline-atomics -o - %s | FileCheck %s -check-prefix=OUTLINE-ATOMICS
define i8 @test_load_8(ptr %addr) {
-; CHECK-LABAL: test_load_8:
+; CHECK-LABEL: test_load_8:
; CHECK: ldarb w0, [x0]
%val = load atomic i8, ptr %addr seq_cst, align 1
ret i8 %val
}
define i16 @test_load_16(ptr %addr) {
-; CHECK-LABAL: test_load_16:
+; CHECK-LABEL: test_load_16:
; CHECK: ldarh w0, [x0]
%val = load atomic i16, ptr %addr acquire, align 2
ret i16 %val
}
define i32 @test_load_32(ptr %addr) {
-; CHECK-LABAL: test_load_32:
+; CHECK-LABEL: test_load_32:
; CHECK: ldar w0, [x0]
%val = load atomic i32, ptr %addr seq_cst, align 4
ret i32 %val
}
define i64 @test_load_64(ptr %addr) {
-; CHECK-LABAL: test_load_64:
+; CHECK-LABEL: test_load_64:
; CHECK: ldar x0, [x0]
%val = load atomic i64, ptr %addr seq_cst, align 8
ret i64 %val
}
define ptr @test_load_ptr(ptr %addr) {
-; CHECK-LABAL: test_load_ptr:
+; CHECK-LABEL: test_load_ptr:
; CHECK: ldar w0, [x0]
%val = load atomic ptr, ptr %addr seq_cst, align 8
ret ptr %val
}
define void @test_store_8(ptr %addr) {
-; CHECK-LABAL: test_store_8:
+; CHECK-LABEL: test_store_8:
; CHECK: stlrb wzr, [x0]
store atomic i8 0, ptr %addr seq_cst, align 1
ret void
}
define void @test_store_16(ptr %addr) {
-; CHECK-LABAL: test_store_16:
+; CHECK-LABEL: test_store_16:
; CHECK: stlrh wzr, [x0]
store atomic i16 0, ptr %addr seq_cst, align 2
ret void
}
define void @test_store_32(ptr %addr) {
-; CHECK-LABAL: test_store_32:
+; CHECK-LABEL: test_store_32:
; CHECK: stlr wzr, [x0]
store atomic i32 0, ptr %addr seq_cst, align 4
ret void
}
define void @test_store_64(ptr %addr) {
-; CHECK-LABAL: test_store_64:
+; CHECK-LABEL: test_store_64:
; CHECK: stlr xzr, [x0]
store atomic i64 0, ptr %addr seq_cst, align 8
ret void
}
define void @test_store_ptr(ptr %addr) {
-; CHECK-LABAL: test_store_ptr:
+; CHECK-LABEL: test_store_ptr:
; CHECK: stlr wzr, [x0]
store atomic ptr null, ptr %addr seq_cst, align 8
ret void
diff --git a/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll b/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll
index e9556b9d5cbee..c550a24754c96 100644
--- a/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll
+++ b/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll
@@ -1,7 +1,7 @@
; RUN: llc -mtriple=arm64ec-pc-windows-msvc < %s | FileCheck %s
define void @no_op() nounwind {
-; CHECK-LABEL .def $ientry_thunk$cdecl$v$v;
+; CHECK-LABEL: .def $ientry_thunk$cdecl$v$v;
; CHECK: .section .wowthk$aa,"xr",discard,$ientry_thunk$cdecl$v$v
; CHECK: // %bb.0:
; CHECK-NEXT: stp q6, q7, [sp, #-176]! // 32-byte Folded Spill
diff --git a/llvm/test/CodeGen/AArch64/cxx-tlscc.ll b/llvm/test/CodeGen/AArch64/cxx-tlscc.ll
index 21367aaa8b07f..5a2be8e0e47a9 100644
--- a/llvm/test/CodeGen/AArch64/cxx-tlscc.ll
+++ b/llvm/test/CodeGen/AArch64/cxx-tlscc.ll
@@ -46,7 +46,7 @@ __tls_init.exit:
; CHECK-NOT: stp x20, x19
; FIXME: The splitting logic in the register allocator fails to split along
; control flow here, we used to get this right by accident before...
-; CHECK-NOTXX: stp x14, x13
+; COM: CHECK-NOT: stp x14, x13
; CHECK-NOT: stp x12, x11
; CHECK-NOT: stp x10, x9
; CHECK-NOT: stp x8, x7
@@ -65,7 +65,7 @@ __tls_init.exit:
; CHECK-NOT: ldp x8, x7
; CHECK-NOT: ldp x10, x9
; CHECK-NOT: ldp x12, x11
-; CHECK-NOTXX: ldp x14, x13
+; COM: CHECK-NOT: ldp x14, x13
; CHECK-NOT: ldp x20, x19
; CHECK-NOT: ldp d1, d0
; CHECK-NOT: ldp d3, d2
diff --git a/llvm/test/CodeGen/AArch64/fp16-fmla.ll b/llvm/test/CodeGen/AArch64/fp16-fmla.ll
index a81721afb8453..916fbeb94dcf8 100644
--- a/llvm/test/CodeGen/AArch64/fp16-fmla.ll
+++ b/llvm/test/CodeGen/AArch64/fp16-fmla.ll
@@ -84,11 +84,11 @@ entry:
define <4 x half> @test_FMLAv4i16_indexed_OP1(<4 x half> %a, <4 x i16> %b, <4 x i16> %c) {
; CHECK-LABEL: test_FMLAv4i16_indexed_OP1:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
; CHECK: mul
; CHECK: fadd
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%mul = mul <4 x i16> %c, %b
%m = bitcast <4 x i16> %mul to <4 x half>
@@ -98,11 +98,11 @@ entry:
define <4 x half> @test_FMLAv4i16_indexed_OP2(<4 x half> %a, <4 x i16> %b, <4 x i16> %c) {
; CHECK-LABEL: test_FMLAv4i16_indexed_OP2:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
; CHECK: mul
; CHECK: fadd
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%mul = mul <4 x i16> %c, %b
%m = bitcast <4 x i16> %mul to <4 x half>
@@ -112,11 +112,11 @@ entry:
define <8 x half> @test_FMLAv8i16_indexed_OP1(<8 x half> %a, <8 x i16> %b, <8 x i16> %c) {
; CHECK-LABEL: test_FMLAv8i16_indexed_OP1:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
; CHECK: mul
; CHECK: fadd
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%mul = mul <8 x i16> %c, %b
%m = bitcast <8 x i16> %mul to <8 x half>
@@ -126,11 +126,11 @@ entry:
define <8 x half> @test_FMLAv8i16_indexed_OP2(<8 x half> %a, <8 x i16> %b, <8 x i16> %c) {
; CHECK-LABEL: test_FMLAv8i16_indexed_OP2:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
; CHECK: mul
; CHECK: fadd
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%mul = mul <8 x i16> %c, %b
%m = bitcast <8 x i16> %mul to <8 x half>
@@ -178,11 +178,11 @@ entry:
define <4 x half> @test_FMLSv4i16_indexed_OP2(<4 x half> %a, <4 x i16> %b, <4 x i16> %c) {
; CHECK-LABEL: test_FMLSv4i16_indexed_OP2:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
; CHECK: mul
; CHECK: fsub
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmls {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmls {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%mul = mul <4 x i16> %c, %b
%m = bitcast <4 x i16> %mul to <4 x half>
@@ -192,12 +192,12 @@ entry:
define <8 x half> @test_FMLSv8i16_indexed_OP1(<8 x half> %a, <8 x i16> %b, <8 x i16> %c) {
; CHECK-LABEL: test_FMLSv8i16_indexed_OP1:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
; CHECK: mul
; CHECK: fsub
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fneg {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-; CHECK-FIXME: fmla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fneg {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; COM: CHECK: fmla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%mul = mul <8 x i16> %c, %b
%m = bitcast <8 x i16> %mul to <8 x half>
@@ -207,11 +207,11 @@ entry:
define <8 x half> @test_FMLSv8i16_indexed_OP2(<8 x half> %a, <8 x i16> %b, <8 x i16> %c) {
; CHECK-LABEL: test_FMLSv8i16_indexed_OP2:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
; CHECK: mul
; CHECK: fsub
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmls {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmls {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%mul = mul <8 x i16> %c, %b
%m = bitcast <8 x i16> %mul to <8 x half>
diff --git a/llvm/test/CodeGen/AArch64/fpimm.ll b/llvm/test/CodeGen/AArch64/fpimm.ll
index b92bb4245c7f3..e2944243338f5 100644
--- a/llvm/test/CodeGen/AArch64/fpimm.ll
+++ b/llvm/test/CodeGen/AArch64/fpimm.ll
@@ -38,7 +38,7 @@ define void @check_double() {
; 64-bit ORR followed by MOVK.
; CHECK-DAG: mov [[XFP0:x[0-9]+]], #1082331758844
; CHECK-DAG: movk [[XFP0]], #64764, lsl #16
-; CHECk-DAG: fmov {{d[0-9]+}}, [[XFP0]]
+; CHECK-DAG: fmov {{d[0-9]+}}, [[XFP0]]
%newval3 = fadd double %val, 0xFCFCFC00FC
store volatile double %newval3, ptr @varf64
diff --git a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-diff-scope-same-key.ll b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-diff-scope-same-key.ll
index a5757a70843a9..fa63df35ac857 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-diff-scope-same-key.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-diff-scope-same-key.ll
@@ -28,7 +28,7 @@ define void @a() "sign-return-address"="all" {
}
define void @b() "sign-return-address"="non-leaf" {
-; CHECK-LABE: b: // @b
+; CHECK-LABEL: b: // @b
; V8A-NOT: hint #25
; V83A-NOT: paciasp
; CHECK-NOT: .cfi_negate_ra_state
diff --git a/llvm/test/CodeGen/AArch64/misched-fusion-lit.ll b/llvm/test/CodeGen/AArch64/misched-fusion-lit.ll
index fedbb642a3620..c589d356e6937 100644
--- a/llvm/test/CodeGen/AArch64/misched-fusion-lit.ll
+++ b/llvm/test/CodeGen/AArch64/misched-fusion-lit.ll
@@ -83,11 +83,11 @@ entry:
ret double 0x400921FB54442D18
; CHECK-LABEL: litf:
-; CHECK-DONT: adrp [[ADDR:x[0-9]+]], [[CSTLABEL:.LCP.*]]
-; CHECK-DONT-NEXT: ldr {{d[0-9]+}}, {{[[]}}[[ADDR]], :lo12:[[CSTLABEL]]{{[]]}}
-; CHECK-FUSE: mov [[R:x[0-9]+]], #11544
-; CHECK-FUSE: movk [[R]], #21572, lsl #16
-; CHECK-FUSE: movk [[R]], #8699, lsl #32
-; CHECK-FUSE: movk [[R]], #16393, lsl #48
-; CHECK-FUSE: fmov {{d[0-9]+}}, [[R]]
+; CHECKDONT: adrp [[ADDR:x[0-9]+]], [[CSTLABEL:.LCP.*]]
+; CHECKDONT-NEXT: ldr {{d[0-9]+}}, {{[[]}}[[ADDR]], :lo12:[[CSTLABEL]]{{[]]}}
+; CHECKFUSE: mov [[R:...
[truncated]
|
@llvm/pr-subscribers-backend-webassembly Author: klensy (klensy) Changesblocked on #93673, actual changes in last commit only, other ones from previous. Patch is 261.42 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/95433.diff 184 Files Affected:
diff --git a/llvm/test/Analysis/CostModel/AArch64/sve-shuffle-broadcast.ll b/llvm/test/Analysis/CostModel/AArch64/sve-shuffle-broadcast.ll
index a2526d9f5591a..c2aab35194831 100644
--- a/llvm/test/Analysis/CostModel/AArch64/sve-shuffle-broadcast.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/sve-shuffle-broadcast.ll
@@ -31,7 +31,7 @@ define void @broadcast() #0{
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = shufflevector <vscale x 8 x i1> undef, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = shufflevector <vscale x 4 x i1> undef, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %24 = shufflevector <vscale x 2 x i1> undef, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
-; CHECK-NETX: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
%zero = shufflevector <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%1 = shufflevector <vscale x 32 x i8> undef, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/pointer-phis.ll b/llvm/test/Analysis/LoopAccessAnalysis/pointer-phis.ll
index a214451bfd3fd..f0a5e9045c3b2 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/pointer-phis.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/pointer-phis.ll
@@ -293,7 +293,7 @@ define i32 @store_with_pointer_phi_incoming_phi(ptr %A, ptr %B, ptr %C, i1 %c.0,
; CHECK-EMPTY:
; CHECK-NEXT: Expressions re-written:
;
-; CHECK-EMPTY
+; CHECK-EMPTY:
entry:
br label %loop.header
@@ -376,7 +376,7 @@ define i32 @store_with_pointer_phi_incoming_phi_irreducible_cycle(ptr %A, ptr %B
; CHECK-EMPTY:
; CHECK-NEXT: Expressions re-written:
;
-; CHECK-EMPTY
+; CHECK-EMPTY:
entry:
br label %loop.header
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/branch-outside.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/branch-outside.ll
index 7fd8ac40e4bec..500fcc41dc40c 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/branch-outside.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/branch-outside.ll
@@ -1,6 +1,6 @@
; RUN: opt %s -mtriple amdgcn-- -passes='print<uniformity>' -disable-output 2>&1 | FileCheck %s
-; CHECK=LABEL: UniformityInfo for function 'basic':
+; CHECK-LABEL: UniformityInfo for function 'basic':
; CHECK: CYCLES ASSSUMED DIVERGENT:
; CHECK: depth=1: entries(P T) Q
define amdgpu_kernel void @basic(i32 %a, i32 %b, i32 %c) {
@@ -37,7 +37,7 @@ exit:
ret void
}
-; CHECK=LABEL: UniformityInfo for function 'nested':
+; CHECK-LABEL: UniformityInfo for function 'nested':
; CHECK: CYCLES ASSSUMED DIVERGENT:
; CHECK: depth=1: entries(P T) Q A C B
define amdgpu_kernel void @nested(i32 %a, i32 %b, i32 %c) {
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/exit-divergence.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/exit-divergence.ll
index 2a3ff4166213d..4b6fced1d58dd 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/exit-divergence.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/exit-divergence.ll
@@ -1,6 +1,6 @@
; RUN: opt %s -mtriple amdgcn-- -passes='print<uniformity>' -disable-output 2>&1 | FileCheck %s
-; CHECK=LABEL: UniformityInfo for function 'basic':
+; CHECK-LABEL: UniformityInfo for function 'basic':
; CHECK-NOT: CYCLES ASSSUMED DIVERGENT:
; CHECK: CYCLES WITH DIVERGENT EXIT:
; CHECK: depth=1: entries(P T) Q
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/reducible-headers.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/reducible-headers.ll
index feb29497f80c9..6edd6384db7d1 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/reducible-headers.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/reducible-headers.ll
@@ -31,7 +31,7 @@
; at P should not be marked divergent.
define amdgpu_kernel void @nested_irreducible(i32 %a, i32 %b, i32 %c) {
-; CHECK=LABEL: UniformityInfo for function 'nested_irreducible':
+; CHECK-LABEL: UniformityInfo for function 'nested_irreducible':
; CHECK-NOT: CYCLES ASSSUMED DIVERGENT:
; CHECK: CYCLES WITH DIVERGENT EXIT:
; CHECK-DAG: depth=2: entries(P T) R Q
@@ -118,7 +118,7 @@ exit:
; Thus, any PHI at P should not be marked divergent.
define amdgpu_kernel void @header_label_1(i32 %a, i32 %b, i32 %c) {
-; CHECK=LABEL: UniformityInfo for function 'header_label_1':
+; CHECK-LABEL: UniformityInfo for function 'header_label_1':
; CHECK-NOT: CYCLES ASSSUMED DIVERGENT:
; CHECK: CYCLES WITH DIVERGENT EXIT:
; CHECK: depth=1: entries(H) Q P U T R
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/temporal_diverge.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/temporal_diverge.ll
index 395d7125e3c8d..3015e1326a406 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/temporal_diverge.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/temporal_diverge.ll
@@ -169,7 +169,7 @@ X:
br label %G
G:
-; C HECK: DIVERGENT: %div.user =
+; CHECK: DIVERGENT: %div.user =
%div.user = add i32 %uni.inc, 5
br i1 %uni.cond, label %G, label %Y
; CHECK: DIVERGENT: %div.user =
diff --git a/llvm/test/Assembler/bfloat.ll b/llvm/test/Assembler/bfloat.ll
index 3a3b4c2b277db..6f935c5dac154 100644
--- a/llvm/test/Assembler/bfloat.ll
+++ b/llvm/test/Assembler/bfloat.ll
@@ -37,25 +37,25 @@ define float @check_bfloat_convert() {
ret float %tmp
}
-; ASSEM-DISASS-LABEL @snan_bfloat
+; ASSEM-DISASS-LABEL: @snan_bfloat
define bfloat @snan_bfloat() {
; ASSEM-DISASS: ret bfloat 0xR7F81
ret bfloat 0xR7F81
}
-; ASSEM-DISASS-LABEL @qnan_bfloat
+; ASSEM-DISASS-LABEL: @qnan_bfloat
define bfloat @qnan_bfloat() {
; ASSEM-DISASS: ret bfloat 0xR7FC0
ret bfloat 0xR7FC0
}
-; ASSEM-DISASS-LABEL @pos_inf_bfloat
+; ASSEM-DISASS-LABEL: @pos_inf_bfloat
define bfloat @pos_inf_bfloat() {
; ASSEM-DISASS: ret bfloat 0xR7F80
ret bfloat 0xR7F80
}
-; ASSEM-DISASS-LABEL @neg_inf_bfloat
+; ASSEM-DISASS-LABEL: @neg_inf_bfloat
define bfloat @neg_inf_bfloat() {
; ASSEM-DISASS: ret bfloat 0xRFF80
ret bfloat 0xRFF80
diff --git a/llvm/test/Bitcode/convergence-control.ll b/llvm/test/Bitcode/convergence-control.ll
index 7ba5609b6a7cc..6988ab029f42a 100644
--- a/llvm/test/Bitcode/convergence-control.ll
+++ b/llvm/test/Bitcode/convergence-control.ll
@@ -18,7 +18,7 @@ B:
C:
; CHECK-LABEL: C:
; CHECK: [[C:%.*]] = call token @llvm.experimental.convergence.loop() [ "convergencectrl"(token [[B]]) ]
- ; CHEC K: call void @f() [ "convergencectrl"(token [[C]]) ]
+ ; CHECK: call void @f() [ "convergencectrl"(token [[C]]) ]
;
%c = call token @llvm.experimental.convergence.loop() [ "convergencectrl"(token %b) ]
call void @f() [ "convergencectrl"(token %c) ]
diff --git a/llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll b/llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll
index b2643dc8f9dcb..44071a113a4a0 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll
@@ -320,8 +320,8 @@ declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3lane.
define %struct.bfloat16x8x3_t @test_vld3q_lane_bf16(ptr %ptr, [3 x <8 x bfloat>] %src.coerce) local_unnamed_addr nounwind {
; CHECK-LABEL: test_vld3q_lane_bf16:
; CHECK: // %bb.0: // %entry
-; CHECKT: ld3 { v0.h, v1.h, v2.h }[7], [x0]
-; CHECKT: ret
+; CHECK: ld3 { v0.h, v1.h, v2.h }[7], [x0]
+; CHECK: ret
entry:
%src.coerce.fca.0.extract = extractvalue [3 x <8 x bfloat>] %src.coerce, 0
%src.coerce.fca.1.extract = extractvalue [3 x <8 x bfloat>] %src.coerce, 1
diff --git a/llvm/test/CodeGen/AArch64/aarch64-mulv.ll b/llvm/test/CodeGen/AArch64/aarch64-mulv.ll
index e11ae9a251590..aa4f374d5d7e7 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-mulv.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-mulv.ll
@@ -2,7 +2,7 @@
; RUN: llc -mtriple=aarch64 -aarch64-enable-sink-fold=true -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
; RUN: llc -mtriple=aarch64 -aarch64-enable-sink-fold=true -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
-; CHECK_GI: warning: Instruction selection used fallback path for mulv_v3i64
+; CHECK-GI: warning: Instruction selection used fallback path for mulv_v3i64
declare i8 @llvm.vector.reduce.mul.v2i8(<2 x i8>)
declare i8 @llvm.vector.reduce.mul.v3i8(<3 x i8>)
diff --git a/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll b/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
index 6bcd2f04849b2..d999959bba46f 100644
--- a/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
@@ -8,7 +8,7 @@ define i32 @fct(i32 %i1, i32 %i2) {
; Sign extension is used more than once, thus it should not be folded.
; CodeGenPrepare is not sharing sext across uses, thus this is folded because
; of that.
-; _CHECK-NOT: , sxtw]
+; CHECK-NOT: , sxtw]
entry:
%idxprom = sext i32 %i1 to i64
%0 = load ptr, ptr @block, align 8
diff --git a/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll b/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll
index 0000262e833da..19b9205dc1786 100644
--- a/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll
+++ b/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll
@@ -2,70 +2,70 @@
; RUN: llc -mtriple=arm64_32-apple-ios7.0 -mattr=+outline-atomics -o - %s | FileCheck %s -check-prefix=OUTLINE-ATOMICS
define i8 @test_load_8(ptr %addr) {
-; CHECK-LABAL: test_load_8:
+; CHECK-LABEL: test_load_8:
; CHECK: ldarb w0, [x0]
%val = load atomic i8, ptr %addr seq_cst, align 1
ret i8 %val
}
define i16 @test_load_16(ptr %addr) {
-; CHECK-LABAL: test_load_16:
+; CHECK-LABEL: test_load_16:
; CHECK: ldarh w0, [x0]
%val = load atomic i16, ptr %addr acquire, align 2
ret i16 %val
}
define i32 @test_load_32(ptr %addr) {
-; CHECK-LABAL: test_load_32:
+; CHECK-LABEL: test_load_32:
; CHECK: ldar w0, [x0]
%val = load atomic i32, ptr %addr seq_cst, align 4
ret i32 %val
}
define i64 @test_load_64(ptr %addr) {
-; CHECK-LABAL: test_load_64:
+; CHECK-LABEL: test_load_64:
; CHECK: ldar x0, [x0]
%val = load atomic i64, ptr %addr seq_cst, align 8
ret i64 %val
}
define ptr @test_load_ptr(ptr %addr) {
-; CHECK-LABAL: test_load_ptr:
+; CHECK-LABEL: test_load_ptr:
; CHECK: ldar w0, [x0]
%val = load atomic ptr, ptr %addr seq_cst, align 8
ret ptr %val
}
define void @test_store_8(ptr %addr) {
-; CHECK-LABAL: test_store_8:
+; CHECK-LABEL: test_store_8:
; CHECK: stlrb wzr, [x0]
store atomic i8 0, ptr %addr seq_cst, align 1
ret void
}
define void @test_store_16(ptr %addr) {
-; CHECK-LABAL: test_store_16:
+; CHECK-LABEL: test_store_16:
; CHECK: stlrh wzr, [x0]
store atomic i16 0, ptr %addr seq_cst, align 2
ret void
}
define void @test_store_32(ptr %addr) {
-; CHECK-LABAL: test_store_32:
+; CHECK-LABEL: test_store_32:
; CHECK: stlr wzr, [x0]
store atomic i32 0, ptr %addr seq_cst, align 4
ret void
}
define void @test_store_64(ptr %addr) {
-; CHECK-LABAL: test_store_64:
+; CHECK-LABEL: test_store_64:
; CHECK: stlr xzr, [x0]
store atomic i64 0, ptr %addr seq_cst, align 8
ret void
}
define void @test_store_ptr(ptr %addr) {
-; CHECK-LABAL: test_store_ptr:
+; CHECK-LABEL: test_store_ptr:
; CHECK: stlr wzr, [x0]
store atomic ptr null, ptr %addr seq_cst, align 8
ret void
diff --git a/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll b/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll
index e9556b9d5cbee..c550a24754c96 100644
--- a/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll
+++ b/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll
@@ -1,7 +1,7 @@
; RUN: llc -mtriple=arm64ec-pc-windows-msvc < %s | FileCheck %s
define void @no_op() nounwind {
-; CHECK-LABEL .def $ientry_thunk$cdecl$v$v;
+; CHECK-LABEL: .def $ientry_thunk$cdecl$v$v;
; CHECK: .section .wowthk$aa,"xr",discard,$ientry_thunk$cdecl$v$v
; CHECK: // %bb.0:
; CHECK-NEXT: stp q6, q7, [sp, #-176]! // 32-byte Folded Spill
diff --git a/llvm/test/CodeGen/AArch64/cxx-tlscc.ll b/llvm/test/CodeGen/AArch64/cxx-tlscc.ll
index 21367aaa8b07f..5a2be8e0e47a9 100644
--- a/llvm/test/CodeGen/AArch64/cxx-tlscc.ll
+++ b/llvm/test/CodeGen/AArch64/cxx-tlscc.ll
@@ -46,7 +46,7 @@ __tls_init.exit:
; CHECK-NOT: stp x20, x19
; FIXME: The splitting logic in the register allocator fails to split along
; control flow here, we used to get this right by accident before...
-; CHECK-NOTXX: stp x14, x13
+; COM: CHECK-NOT: stp x14, x13
; CHECK-NOT: stp x12, x11
; CHECK-NOT: stp x10, x9
; CHECK-NOT: stp x8, x7
@@ -65,7 +65,7 @@ __tls_init.exit:
; CHECK-NOT: ldp x8, x7
; CHECK-NOT: ldp x10, x9
; CHECK-NOT: ldp x12, x11
-; CHECK-NOTXX: ldp x14, x13
+; COM: CHECK-NOT: ldp x14, x13
; CHECK-NOT: ldp x20, x19
; CHECK-NOT: ldp d1, d0
; CHECK-NOT: ldp d3, d2
diff --git a/llvm/test/CodeGen/AArch64/fp16-fmla.ll b/llvm/test/CodeGen/AArch64/fp16-fmla.ll
index a81721afb8453..916fbeb94dcf8 100644
--- a/llvm/test/CodeGen/AArch64/fp16-fmla.ll
+++ b/llvm/test/CodeGen/AArch64/fp16-fmla.ll
@@ -84,11 +84,11 @@ entry:
define <4 x half> @test_FMLAv4i16_indexed_OP1(<4 x half> %a, <4 x i16> %b, <4 x i16> %c) {
; CHECK-LABEL: test_FMLAv4i16_indexed_OP1:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
; CHECK: mul
; CHECK: fadd
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%mul = mul <4 x i16> %c, %b
%m = bitcast <4 x i16> %mul to <4 x half>
@@ -98,11 +98,11 @@ entry:
define <4 x half> @test_FMLAv4i16_indexed_OP2(<4 x half> %a, <4 x i16> %b, <4 x i16> %c) {
; CHECK-LABEL: test_FMLAv4i16_indexed_OP2:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
; CHECK: mul
; CHECK: fadd
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%mul = mul <4 x i16> %c, %b
%m = bitcast <4 x i16> %mul to <4 x half>
@@ -112,11 +112,11 @@ entry:
define <8 x half> @test_FMLAv8i16_indexed_OP1(<8 x half> %a, <8 x i16> %b, <8 x i16> %c) {
; CHECK-LABEL: test_FMLAv8i16_indexed_OP1:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
; CHECK: mul
; CHECK: fadd
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%mul = mul <8 x i16> %c, %b
%m = bitcast <8 x i16> %mul to <8 x half>
@@ -126,11 +126,11 @@ entry:
define <8 x half> @test_FMLAv8i16_indexed_OP2(<8 x half> %a, <8 x i16> %b, <8 x i16> %c) {
; CHECK-LABEL: test_FMLAv8i16_indexed_OP2:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
; CHECK: mul
; CHECK: fadd
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%mul = mul <8 x i16> %c, %b
%m = bitcast <8 x i16> %mul to <8 x half>
@@ -178,11 +178,11 @@ entry:
define <4 x half> @test_FMLSv4i16_indexed_OP2(<4 x half> %a, <4 x i16> %b, <4 x i16> %c) {
; CHECK-LABEL: test_FMLSv4i16_indexed_OP2:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
; CHECK: mul
; CHECK: fsub
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmls {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmls {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%mul = mul <4 x i16> %c, %b
%m = bitcast <4 x i16> %mul to <4 x half>
@@ -192,12 +192,12 @@ entry:
define <8 x half> @test_FMLSv8i16_indexed_OP1(<8 x half> %a, <8 x i16> %b, <8 x i16> %c) {
; CHECK-LABEL: test_FMLSv8i16_indexed_OP1:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
; CHECK: mul
; CHECK: fsub
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fneg {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-; CHECK-FIXME: fmla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fneg {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; COM: CHECK: fmla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%mul = mul <8 x i16> %c, %b
%m = bitcast <8 x i16> %mul to <8 x half>
@@ -207,11 +207,11 @@ entry:
define <8 x half> @test_FMLSv8i16_indexed_OP2(<8 x half> %a, <8 x i16> %b, <8 x i16> %c) {
; CHECK-LABEL: test_FMLSv8i16_indexed_OP2:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
; CHECK: mul
; CHECK: fsub
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmls {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmls {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%mul = mul <8 x i16> %c, %b
%m = bitcast <8 x i16> %mul to <8 x half>
diff --git a/llvm/test/CodeGen/AArch64/fpimm.ll b/llvm/test/CodeGen/AArch64/fpimm.ll
index b92bb4245c7f3..e2944243338f5 100644
--- a/llvm/test/CodeGen/AArch64/fpimm.ll
+++ b/llvm/test/CodeGen/AArch64/fpimm.ll
@@ -38,7 +38,7 @@ define void @check_double() {
; 64-bit ORR followed by MOVK.
; CHECK-DAG: mov [[XFP0:x[0-9]+]], #1082331758844
; CHECK-DAG: movk [[XFP0]], #64764, lsl #16
-; CHECk-DAG: fmov {{d[0-9]+}}, [[XFP0]]
+; CHECK-DAG: fmov {{d[0-9]+}}, [[XFP0]]
%newval3 = fadd double %val, 0xFCFCFC00FC
store volatile double %newval3, ptr @varf64
diff --git a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-diff-scope-same-key.ll b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-diff-scope-same-key.ll
index a5757a70843a9..fa63df35ac857 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-diff-scope-same-key.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-diff-scope-same-key.ll
@@ -28,7 +28,7 @@ define void @a() "sign-return-address"="all" {
}
define void @b() "sign-return-address"="non-leaf" {
-; CHECK-LABE: b: // @b
+; CHECK-LABEL: b: // @b
; V8A-NOT: hint #25
; V83A-NOT: paciasp
; CHECK-NOT: .cfi_negate_ra_state
diff --git a/llvm/test/CodeGen/AArch64/misched-fusion-lit.ll b/llvm/test/CodeGen/AArch64/misched-fusion-lit.ll
index fedbb642a3620..c589d356e6937 100644
--- a/llvm/test/CodeGen/AArch64/misched-fusion-lit.ll
+++ b/llvm/test/CodeGen/AArch64/misched-fusion-lit.ll
@@ -83,11 +83,11 @@ entry:
ret double 0x400921FB54442D18
; CHECK-LABEL: litf:
-; CHECK-DONT: adrp [[ADDR:x[0-9]+]], [[CSTLABEL:.LCP.*]]
-; CHECK-DONT-NEXT: ldr {{d[0-9]+}}, {{[[]}}[[ADDR]], :lo12:[[CSTLABEL]]{{[]]}}
-; CHECK-FUSE: mov [[R:x[0-9]+]], #11544
-; CHECK-FUSE: movk [[R]], #21572, lsl #16
-; CHECK-FUSE: movk [[R]], #8699, lsl #32
-; CHECK-FUSE: movk [[R]], #16393, lsl #48
-; CHECK-FUSE: fmov {{d[0-9]+}}, [[R]]
+; CHECKDONT: adrp [[ADDR:x[0-9]+]], [[CSTLABEL:.LCP.*]]
+; CHECKDONT-NEXT: ldr {{d[0-9]+}}, {{[[]}}[[ADDR]], :lo12:[[CSTLABEL]]{{[]]}}
+; CHECKFUSE: mov [[R:...
[truncated]
|
@llvm/pr-subscribers-backend-arm Author: klensy (klensy) Changesblocked on #93673, actual changes in last commit only, other ones from previous. Patch is 261.42 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/95433.diff 184 Files Affected:
diff --git a/llvm/test/Analysis/CostModel/AArch64/sve-shuffle-broadcast.ll b/llvm/test/Analysis/CostModel/AArch64/sve-shuffle-broadcast.ll
index a2526d9f5591a..c2aab35194831 100644
--- a/llvm/test/Analysis/CostModel/AArch64/sve-shuffle-broadcast.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/sve-shuffle-broadcast.ll
@@ -31,7 +31,7 @@ define void @broadcast() #0{
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = shufflevector <vscale x 8 x i1> undef, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = shufflevector <vscale x 4 x i1> undef, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %24 = shufflevector <vscale x 2 x i1> undef, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
-; CHECK-NETX: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
%zero = shufflevector <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%1 = shufflevector <vscale x 32 x i8> undef, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/pointer-phis.ll b/llvm/test/Analysis/LoopAccessAnalysis/pointer-phis.ll
index a214451bfd3fd..f0a5e9045c3b2 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/pointer-phis.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/pointer-phis.ll
@@ -293,7 +293,7 @@ define i32 @store_with_pointer_phi_incoming_phi(ptr %A, ptr %B, ptr %C, i1 %c.0,
; CHECK-EMPTY:
; CHECK-NEXT: Expressions re-written:
;
-; CHECK-EMPTY
+; CHECK-EMPTY:
entry:
br label %loop.header
@@ -376,7 +376,7 @@ define i32 @store_with_pointer_phi_incoming_phi_irreducible_cycle(ptr %A, ptr %B
; CHECK-EMPTY:
; CHECK-NEXT: Expressions re-written:
;
-; CHECK-EMPTY
+; CHECK-EMPTY:
entry:
br label %loop.header
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/branch-outside.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/branch-outside.ll
index 7fd8ac40e4bec..500fcc41dc40c 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/branch-outside.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/branch-outside.ll
@@ -1,6 +1,6 @@
; RUN: opt %s -mtriple amdgcn-- -passes='print<uniformity>' -disable-output 2>&1 | FileCheck %s
-; CHECK=LABEL: UniformityInfo for function 'basic':
+; CHECK-LABEL: UniformityInfo for function 'basic':
; CHECK: CYCLES ASSSUMED DIVERGENT:
; CHECK: depth=1: entries(P T) Q
define amdgpu_kernel void @basic(i32 %a, i32 %b, i32 %c) {
@@ -37,7 +37,7 @@ exit:
ret void
}
-; CHECK=LABEL: UniformityInfo for function 'nested':
+; CHECK-LABEL: UniformityInfo for function 'nested':
; CHECK: CYCLES ASSSUMED DIVERGENT:
; CHECK: depth=1: entries(P T) Q A C B
define amdgpu_kernel void @nested(i32 %a, i32 %b, i32 %c) {
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/exit-divergence.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/exit-divergence.ll
index 2a3ff4166213d..4b6fced1d58dd 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/exit-divergence.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/exit-divergence.ll
@@ -1,6 +1,6 @@
; RUN: opt %s -mtriple amdgcn-- -passes='print<uniformity>' -disable-output 2>&1 | FileCheck %s
-; CHECK=LABEL: UniformityInfo for function 'basic':
+; CHECK-LABEL: UniformityInfo for function 'basic':
; CHECK-NOT: CYCLES ASSSUMED DIVERGENT:
; CHECK: CYCLES WITH DIVERGENT EXIT:
; CHECK: depth=1: entries(P T) Q
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/reducible-headers.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/reducible-headers.ll
index feb29497f80c9..6edd6384db7d1 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/reducible-headers.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/reducible-headers.ll
@@ -31,7 +31,7 @@
; at P should not be marked divergent.
define amdgpu_kernel void @nested_irreducible(i32 %a, i32 %b, i32 %c) {
-; CHECK=LABEL: UniformityInfo for function 'nested_irreducible':
+; CHECK-LABEL: UniformityInfo for function 'nested_irreducible':
; CHECK-NOT: CYCLES ASSSUMED DIVERGENT:
; CHECK: CYCLES WITH DIVERGENT EXIT:
; CHECK-DAG: depth=2: entries(P T) R Q
@@ -118,7 +118,7 @@ exit:
; Thus, any PHI at P should not be marked divergent.
define amdgpu_kernel void @header_label_1(i32 %a, i32 %b, i32 %c) {
-; CHECK=LABEL: UniformityInfo for function 'header_label_1':
+; CHECK-LABEL: UniformityInfo for function 'header_label_1':
; CHECK-NOT: CYCLES ASSSUMED DIVERGENT:
; CHECK: CYCLES WITH DIVERGENT EXIT:
; CHECK: depth=1: entries(H) Q P U T R
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/temporal_diverge.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/temporal_diverge.ll
index 395d7125e3c8d..3015e1326a406 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/temporal_diverge.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/temporal_diverge.ll
@@ -169,7 +169,7 @@ X:
br label %G
G:
-; C HECK: DIVERGENT: %div.user =
+; CHECK: DIVERGENT: %div.user =
%div.user = add i32 %uni.inc, 5
br i1 %uni.cond, label %G, label %Y
; CHECK: DIVERGENT: %div.user =
diff --git a/llvm/test/Assembler/bfloat.ll b/llvm/test/Assembler/bfloat.ll
index 3a3b4c2b277db..6f935c5dac154 100644
--- a/llvm/test/Assembler/bfloat.ll
+++ b/llvm/test/Assembler/bfloat.ll
@@ -37,25 +37,25 @@ define float @check_bfloat_convert() {
ret float %tmp
}
-; ASSEM-DISASS-LABEL @snan_bfloat
+; ASSEM-DISASS-LABEL: @snan_bfloat
define bfloat @snan_bfloat() {
; ASSEM-DISASS: ret bfloat 0xR7F81
ret bfloat 0xR7F81
}
-; ASSEM-DISASS-LABEL @qnan_bfloat
+; ASSEM-DISASS-LABEL: @qnan_bfloat
define bfloat @qnan_bfloat() {
; ASSEM-DISASS: ret bfloat 0xR7FC0
ret bfloat 0xR7FC0
}
-; ASSEM-DISASS-LABEL @pos_inf_bfloat
+; ASSEM-DISASS-LABEL: @pos_inf_bfloat
define bfloat @pos_inf_bfloat() {
; ASSEM-DISASS: ret bfloat 0xR7F80
ret bfloat 0xR7F80
}
-; ASSEM-DISASS-LABEL @neg_inf_bfloat
+; ASSEM-DISASS-LABEL: @neg_inf_bfloat
define bfloat @neg_inf_bfloat() {
; ASSEM-DISASS: ret bfloat 0xRFF80
ret bfloat 0xRFF80
diff --git a/llvm/test/Bitcode/convergence-control.ll b/llvm/test/Bitcode/convergence-control.ll
index 7ba5609b6a7cc..6988ab029f42a 100644
--- a/llvm/test/Bitcode/convergence-control.ll
+++ b/llvm/test/Bitcode/convergence-control.ll
@@ -18,7 +18,7 @@ B:
C:
; CHECK-LABEL: C:
; CHECK: [[C:%.*]] = call token @llvm.experimental.convergence.loop() [ "convergencectrl"(token [[B]]) ]
- ; CHEC K: call void @f() [ "convergencectrl"(token [[C]]) ]
+ ; CHECK: call void @f() [ "convergencectrl"(token [[C]]) ]
;
%c = call token @llvm.experimental.convergence.loop() [ "convergencectrl"(token %b) ]
call void @f() [ "convergencectrl"(token %c) ]
diff --git a/llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll b/llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll
index b2643dc8f9dcb..44071a113a4a0 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll
@@ -320,8 +320,8 @@ declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3lane.
define %struct.bfloat16x8x3_t @test_vld3q_lane_bf16(ptr %ptr, [3 x <8 x bfloat>] %src.coerce) local_unnamed_addr nounwind {
; CHECK-LABEL: test_vld3q_lane_bf16:
; CHECK: // %bb.0: // %entry
-; CHECKT: ld3 { v0.h, v1.h, v2.h }[7], [x0]
-; CHECKT: ret
+; CHECK: ld3 { v0.h, v1.h, v2.h }[7], [x0]
+; CHECK: ret
entry:
%src.coerce.fca.0.extract = extractvalue [3 x <8 x bfloat>] %src.coerce, 0
%src.coerce.fca.1.extract = extractvalue [3 x <8 x bfloat>] %src.coerce, 1
diff --git a/llvm/test/CodeGen/AArch64/aarch64-mulv.ll b/llvm/test/CodeGen/AArch64/aarch64-mulv.ll
index e11ae9a251590..aa4f374d5d7e7 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-mulv.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-mulv.ll
@@ -2,7 +2,7 @@
; RUN: llc -mtriple=aarch64 -aarch64-enable-sink-fold=true -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
; RUN: llc -mtriple=aarch64 -aarch64-enable-sink-fold=true -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
-; CHECK_GI: warning: Instruction selection used fallback path for mulv_v3i64
+; CHECK-GI: warning: Instruction selection used fallback path for mulv_v3i64
declare i8 @llvm.vector.reduce.mul.v2i8(<2 x i8>)
declare i8 @llvm.vector.reduce.mul.v3i8(<3 x i8>)
diff --git a/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll b/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
index 6bcd2f04849b2..d999959bba46f 100644
--- a/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
@@ -8,7 +8,7 @@ define i32 @fct(i32 %i1, i32 %i2) {
; Sign extension is used more than once, thus it should not be folded.
; CodeGenPrepare is not sharing sext across uses, thus this is folded because
; of that.
-; _CHECK-NOT: , sxtw]
+; CHECK-NOT: , sxtw]
entry:
%idxprom = sext i32 %i1 to i64
%0 = load ptr, ptr @block, align 8
diff --git a/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll b/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll
index 0000262e833da..19b9205dc1786 100644
--- a/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll
+++ b/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll
@@ -2,70 +2,70 @@
; RUN: llc -mtriple=arm64_32-apple-ios7.0 -mattr=+outline-atomics -o - %s | FileCheck %s -check-prefix=OUTLINE-ATOMICS
define i8 @test_load_8(ptr %addr) {
-; CHECK-LABAL: test_load_8:
+; CHECK-LABEL: test_load_8:
; CHECK: ldarb w0, [x0]
%val = load atomic i8, ptr %addr seq_cst, align 1
ret i8 %val
}
define i16 @test_load_16(ptr %addr) {
-; CHECK-LABAL: test_load_16:
+; CHECK-LABEL: test_load_16:
; CHECK: ldarh w0, [x0]
%val = load atomic i16, ptr %addr acquire, align 2
ret i16 %val
}
define i32 @test_load_32(ptr %addr) {
-; CHECK-LABAL: test_load_32:
+; CHECK-LABEL: test_load_32:
; CHECK: ldar w0, [x0]
%val = load atomic i32, ptr %addr seq_cst, align 4
ret i32 %val
}
define i64 @test_load_64(ptr %addr) {
-; CHECK-LABAL: test_load_64:
+; CHECK-LABEL: test_load_64:
; CHECK: ldar x0, [x0]
%val = load atomic i64, ptr %addr seq_cst, align 8
ret i64 %val
}
define ptr @test_load_ptr(ptr %addr) {
-; CHECK-LABAL: test_load_ptr:
+; CHECK-LABEL: test_load_ptr:
; CHECK: ldar w0, [x0]
%val = load atomic ptr, ptr %addr seq_cst, align 8
ret ptr %val
}
define void @test_store_8(ptr %addr) {
-; CHECK-LABAL: test_store_8:
+; CHECK-LABEL: test_store_8:
; CHECK: stlrb wzr, [x0]
store atomic i8 0, ptr %addr seq_cst, align 1
ret void
}
define void @test_store_16(ptr %addr) {
-; CHECK-LABAL: test_store_16:
+; CHECK-LABEL: test_store_16:
; CHECK: stlrh wzr, [x0]
store atomic i16 0, ptr %addr seq_cst, align 2
ret void
}
define void @test_store_32(ptr %addr) {
-; CHECK-LABAL: test_store_32:
+; CHECK-LABEL: test_store_32:
; CHECK: stlr wzr, [x0]
store atomic i32 0, ptr %addr seq_cst, align 4
ret void
}
define void @test_store_64(ptr %addr) {
-; CHECK-LABAL: test_store_64:
+; CHECK-LABEL: test_store_64:
; CHECK: stlr xzr, [x0]
store atomic i64 0, ptr %addr seq_cst, align 8
ret void
}
define void @test_store_ptr(ptr %addr) {
-; CHECK-LABAL: test_store_ptr:
+; CHECK-LABEL: test_store_ptr:
; CHECK: stlr wzr, [x0]
store atomic ptr null, ptr %addr seq_cst, align 8
ret void
diff --git a/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll b/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll
index e9556b9d5cbee..c550a24754c96 100644
--- a/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll
+++ b/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll
@@ -1,7 +1,7 @@
; RUN: llc -mtriple=arm64ec-pc-windows-msvc < %s | FileCheck %s
define void @no_op() nounwind {
-; CHECK-LABEL .def $ientry_thunk$cdecl$v$v;
+; CHECK-LABEL: .def $ientry_thunk$cdecl$v$v;
; CHECK: .section .wowthk$aa,"xr",discard,$ientry_thunk$cdecl$v$v
; CHECK: // %bb.0:
; CHECK-NEXT: stp q6, q7, [sp, #-176]! // 32-byte Folded Spill
diff --git a/llvm/test/CodeGen/AArch64/cxx-tlscc.ll b/llvm/test/CodeGen/AArch64/cxx-tlscc.ll
index 21367aaa8b07f..5a2be8e0e47a9 100644
--- a/llvm/test/CodeGen/AArch64/cxx-tlscc.ll
+++ b/llvm/test/CodeGen/AArch64/cxx-tlscc.ll
@@ -46,7 +46,7 @@ __tls_init.exit:
; CHECK-NOT: stp x20, x19
; FIXME: The splitting logic in the register allocator fails to split along
; control flow here, we used to get this right by accident before...
-; CHECK-NOTXX: stp x14, x13
+; COM: CHECK-NOT: stp x14, x13
; CHECK-NOT: stp x12, x11
; CHECK-NOT: stp x10, x9
; CHECK-NOT: stp x8, x7
@@ -65,7 +65,7 @@ __tls_init.exit:
; CHECK-NOT: ldp x8, x7
; CHECK-NOT: ldp x10, x9
; CHECK-NOT: ldp x12, x11
-; CHECK-NOTXX: ldp x14, x13
+; COM: CHECK-NOT: ldp x14, x13
; CHECK-NOT: ldp x20, x19
; CHECK-NOT: ldp d1, d0
; CHECK-NOT: ldp d3, d2
diff --git a/llvm/test/CodeGen/AArch64/fp16-fmla.ll b/llvm/test/CodeGen/AArch64/fp16-fmla.ll
index a81721afb8453..916fbeb94dcf8 100644
--- a/llvm/test/CodeGen/AArch64/fp16-fmla.ll
+++ b/llvm/test/CodeGen/AArch64/fp16-fmla.ll
@@ -84,11 +84,11 @@ entry:
define <4 x half> @test_FMLAv4i16_indexed_OP1(<4 x half> %a, <4 x i16> %b, <4 x i16> %c) {
; CHECK-LABEL: test_FMLAv4i16_indexed_OP1:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
; CHECK: mul
; CHECK: fadd
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%mul = mul <4 x i16> %c, %b
%m = bitcast <4 x i16> %mul to <4 x half>
@@ -98,11 +98,11 @@ entry:
define <4 x half> @test_FMLAv4i16_indexed_OP2(<4 x half> %a, <4 x i16> %b, <4 x i16> %c) {
; CHECK-LABEL: test_FMLAv4i16_indexed_OP2:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
; CHECK: mul
; CHECK: fadd
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%mul = mul <4 x i16> %c, %b
%m = bitcast <4 x i16> %mul to <4 x half>
@@ -112,11 +112,11 @@ entry:
define <8 x half> @test_FMLAv8i16_indexed_OP1(<8 x half> %a, <8 x i16> %b, <8 x i16> %c) {
; CHECK-LABEL: test_FMLAv8i16_indexed_OP1:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
; CHECK: mul
; CHECK: fadd
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%mul = mul <8 x i16> %c, %b
%m = bitcast <8 x i16> %mul to <8 x half>
@@ -126,11 +126,11 @@ entry:
define <8 x half> @test_FMLAv8i16_indexed_OP2(<8 x half> %a, <8 x i16> %b, <8 x i16> %c) {
; CHECK-LABEL: test_FMLAv8i16_indexed_OP2:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
; CHECK: mul
; CHECK: fadd
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%mul = mul <8 x i16> %c, %b
%m = bitcast <8 x i16> %mul to <8 x half>
@@ -178,11 +178,11 @@ entry:
define <4 x half> @test_FMLSv4i16_indexed_OP2(<4 x half> %a, <4 x i16> %b, <4 x i16> %c) {
; CHECK-LABEL: test_FMLSv4i16_indexed_OP2:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
; CHECK: mul
; CHECK: fsub
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmls {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmls {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%mul = mul <4 x i16> %c, %b
%m = bitcast <4 x i16> %mul to <4 x half>
@@ -192,12 +192,12 @@ entry:
define <8 x half> @test_FMLSv8i16_indexed_OP1(<8 x half> %a, <8 x i16> %b, <8 x i16> %c) {
; CHECK-LABEL: test_FMLSv8i16_indexed_OP1:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
; CHECK: mul
; CHECK: fsub
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fneg {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-; CHECK-FIXME: fmla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fneg {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; COM: CHECK: fmla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%mul = mul <8 x i16> %c, %b
%m = bitcast <8 x i16> %mul to <8 x half>
@@ -207,11 +207,11 @@ entry:
define <8 x half> @test_FMLSv8i16_indexed_OP2(<8 x half> %a, <8 x i16> %b, <8 x i16> %c) {
; CHECK-LABEL: test_FMLSv8i16_indexed_OP2:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
; CHECK: mul
; CHECK: fsub
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmls {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmls {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%mul = mul <8 x i16> %c, %b
%m = bitcast <8 x i16> %mul to <8 x half>
diff --git a/llvm/test/CodeGen/AArch64/fpimm.ll b/llvm/test/CodeGen/AArch64/fpimm.ll
index b92bb4245c7f3..e2944243338f5 100644
--- a/llvm/test/CodeGen/AArch64/fpimm.ll
+++ b/llvm/test/CodeGen/AArch64/fpimm.ll
@@ -38,7 +38,7 @@ define void @check_double() {
; 64-bit ORR followed by MOVK.
; CHECK-DAG: mov [[XFP0:x[0-9]+]], #1082331758844
; CHECK-DAG: movk [[XFP0]], #64764, lsl #16
-; CHECk-DAG: fmov {{d[0-9]+}}, [[XFP0]]
+; CHECK-DAG: fmov {{d[0-9]+}}, [[XFP0]]
%newval3 = fadd double %val, 0xFCFCFC00FC
store volatile double %newval3, ptr @varf64
diff --git a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-diff-scope-same-key.ll b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-diff-scope-same-key.ll
index a5757a70843a9..fa63df35ac857 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-diff-scope-same-key.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-diff-scope-same-key.ll
@@ -28,7 +28,7 @@ define void @a() "sign-return-address"="all" {
}
define void @b() "sign-return-address"="non-leaf" {
-; CHECK-LABE: b: // @b
+; CHECK-LABEL: b: // @b
; V8A-NOT: hint #25
; V83A-NOT: paciasp
; CHECK-NOT: .cfi_negate_ra_state
diff --git a/llvm/test/CodeGen/AArch64/misched-fusion-lit.ll b/llvm/test/CodeGen/AArch64/misched-fusion-lit.ll
index fedbb642a3620..c589d356e6937 100644
--- a/llvm/test/CodeGen/AArch64/misched-fusion-lit.ll
+++ b/llvm/test/CodeGen/AArch64/misched-fusion-lit.ll
@@ -83,11 +83,11 @@ entry:
ret double 0x400921FB54442D18
; CHECK-LABEL: litf:
-; CHECK-DONT: adrp [[ADDR:x[0-9]+]], [[CSTLABEL:.LCP.*]]
-; CHECK-DONT-NEXT: ldr {{d[0-9]+}}, {{[[]}}[[ADDR]], :lo12:[[CSTLABEL]]{{[]]}}
-; CHECK-FUSE: mov [[R:x[0-9]+]], #11544
-; CHECK-FUSE: movk [[R]], #21572, lsl #16
-; CHECK-FUSE: movk [[R]], #8699, lsl #32
-; CHECK-FUSE: movk [[R]], #16393, lsl #48
-; CHECK-FUSE: fmov {{d[0-9]+}}, [[R]]
+; CHECKDONT: adrp [[ADDR:x[0-9]+]], [[CSTLABEL:.LCP.*]]
+; CHECKDONT-NEXT: ldr {{d[0-9]+}}, {{[[]}}[[ADDR]], :lo12:[[CSTLABEL]]{{[]]}}
+; CHECKFUSE: mov [[R:...
[truncated]
|
; GCN-LABEL: {{^}}test_f32_denormals: | ||
; GCNL: FloatMode: 48 | ||
; GCN: FloatMode: 48 |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
<stdin>:214:3: note: possible intended match here
FloatMode: 240
#CHECK : name : killFlagSameBlock | ||
#CHECK: name : killFlagSameBlock |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
name : killFlagSameBlock
blocked on #93673, actual changes in last commit only, other ones from previous.